ASSERT(is_idle_domain(v->domain));
percpu_ctxt[smp_processor_id()].curr_vcpu = v;
- cpu_set(smp_processor_id(), v->domain->cpumask);
+ cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask);
+ cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask);
v->arch.schedule_tail = continue_idle_domain;
reset_stack_and_jump(idle_loop);
}
if ( p->domain != n->domain )
- cpu_set(cpu, n->domain->cpumask);
+ cpu_set(cpu, n->domain->domain_dirty_cpumask);
+ cpu_set(cpu, n->vcpu_dirty_cpumask);
write_ptbase(n);
}
if ( p->domain != n->domain )
- cpu_clear(cpu, p->domain->cpumask);
+ cpu_clear(cpu, p->domain->domain_dirty_cpumask);
+ cpu_clear(cpu, n->vcpu_dirty_cpumask);
percpu_ctxt[cpu].curr_vcpu = n;
}
void sync_vcpu_execstate(struct vcpu *v)
{
- unsigned int cpu = v->processor;
-
- if ( !cpu_isset(cpu, v->domain->cpumask) )
- return;
-
- if ( cpu == smp_processor_id() )
- {
+ if ( cpu_isset(smp_processor_id(), v->vcpu_dirty_cpumask) )
(void)__sync_lazy_execstate();
- }
- else
- {
- /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
- flush_tlb_mask(cpumask_of_cpu(cpu));
- }
+
+ /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
+ flush_tlb_mask(v->vcpu_dirty_cpumask);
}
unsigned long __hypercall_create_continuation(
struct vcpu *v;
unsigned long pfn;
- BUG_ON(!cpus_empty(d->cpumask));
+ BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
ptwr_destroy(d);
* was GDT/LDT) but those circumstances should be
* very rare.
*/
- cpumask_t mask = page_get_owner(page)->cpumask;
+ cpumask_t mask =
+ page_get_owner(page)->domain_dirty_cpumask;
tlbflush_filter(mask, page->tlbflush_timestamp);
if ( unlikely(!cpus_empty(mask)) )
if ( shadow_mode_enabled(d) )
shadow_sync_all(d);
if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
- flush_tlb_mask(d->cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
else
local_flush_tlb();
}
struct domain *d, unsigned long vmask)
{
unsigned int vcpu_id;
- cpumask_t pmask;
+ cpumask_t pmask = CPU_MASK_NONE;
struct vcpu *v;
while ( vmask != 0 )
vmask &= ~(1UL << vcpu_id);
if ( (vcpu_id < MAX_VIRT_CPUS) &&
((v = d->vcpu[vcpu_id]) != NULL) )
- cpu_set(v->processor, pmask);
+ cpus_or(pmask, pmask, v->vcpu_dirty_cpumask);
}
return pmask;
break;
}
pmask = vcpumask_to_pcpumask(d, vmask);
- cpus_and(pmask, pmask, d->cpumask);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
flush_tlb_mask(pmask);
else
}
case MMUEXT_TLB_FLUSH_ALL:
- flush_tlb_mask(d->cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
break;
case MMUEXT_INVLPG_ALL:
- flush_tlb_one_mask(d->cpumask, op.arg1.linear_addr);
+ flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr);
break;
case MMUEXT_FLUSH_CACHE:
local_flush_tlb();
break;
case UVMF_ALL:
- flush_tlb_mask(d->cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
break;
default:
if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
rc = -EFAULT;
pmask = vcpumask_to_pcpumask(d, vmask);
- cpus_and(pmask, pmask, d->cpumask);
flush_tlb_mask(pmask);
break;
}
local_flush_tlb_one(va);
break;
case UVMF_ALL:
- flush_tlb_one_mask(d->cpumask, va);
+ flush_tlb_one_mask(d->domain_dirty_cpumask, va);
break;
default:
if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
rc = -EFAULT;
pmask = vcpumask_to_pcpumask(d, vmask);
- cpus_and(pmask, pmask, d->cpumask);
flush_tlb_one_mask(pmask, va);
break;
}
/* Ensure that there are no stale writable mappings in any TLB. */
/* NB. INVLPG is a serialising instruction: flushes pending updates. */
- flush_tlb_one_mask(d->cpumask, l1va);
+ flush_tlb_one_mask(d->domain_dirty_cpumask, l1va);
PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n",
PTWR_PRINT_WHICH, ptep, pte.l1);
if ( which == PTWR_PT_ACTIVE )
{
l2e_remove_flags(*pl2e, _PAGE_PRESENT);
- flush_tlb_mask(d->cpumask);
+ flush_tlb_mask(d->domain_dirty_cpumask);
}
/* Temporarily map the L1 page, and make a copy of it. */
}
/* Other VCPUs mustn't use the revoked writable mappings. */
- other_vcpus_mask = d->cpumask;
+ other_vcpus_mask = d->domain_dirty_cpumask;
cpu_clear(smp_processor_id(), other_vcpus_mask);
flush_tlb_mask(other_vcpus_mask);
}
/* Other VCPUs mustn't use the revoked writable mappings. */
- other_vcpus_mask = d->cpumask;
+ other_vcpus_mask = d->domain_dirty_cpumask;
cpu_clear(smp_processor_id(), other_vcpus_mask);
flush_tlb_mask(other_vcpus_mask);
/* Make sure that every vcpu is descheduled before we finalise. */
for_each_vcpu ( d, v )
vcpu_sleep_sync(v);
- BUG_ON(!cpus_empty(d->cpumask));
+ BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
sync_pagetable_state(d);
for ( i = 0; i < count; i++ )
(void)__gnttab_unmap_grant_ref(&uop[i]);
- flush_tlb_mask(current->domain->cpumask);
+ flush_tlb_mask(current->domain->domain_dirty_cpumask);
return 0;
}
shadow_drop_references(d, &pg[i]);
ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
pg[i].tlbflush_timestamp = tlbflush_current_time();
- pg[i].u.free.cpumask = d->cpumask;
+ pg[i].u.free.cpumask = d->domain_dirty_cpumask;
list_del(&pg[i].list);
}
do {
succ = 0;
__clear_cpu_bits(have_lock);
- for_each_vcpu(d, v) {
+ for_each_vcpu ( d, v )
+ {
cpu = v->processor;
- if (!__get_cpu_bit(cpu, have_lock)) {
+ if ( !__get_cpu_bit(cpu, have_lock) )
+ {
/* if we don't have a lock on this CPU: acquire it*/
- if (spin_trylock(&schedule_data[cpu].schedule_lock)) {
+ if ( spin_trylock(&schedule_data[cpu].schedule_lock) )
+ {
/*we have this lock!*/
__set_cpu_bit(cpu, have_lock);
succ = 1;
- } else {
+ }
+ else
+ {
/*we didn,t get this lock -> free all other locks too!*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (__get_cpu_bit(cpu, have_lock))
+ for ( cpu = 0; cpu < NR_CPUS; cpu++ )
+ if ( __get_cpu_bit(cpu, have_lock) )
spin_unlock(&schedule_data[cpu].schedule_lock);
/* and start from the beginning! */
succ = 0;
SCHED_OP(adjdom, d, cmd);
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (__get_cpu_bit(cpu, have_lock))
+ for ( cpu = 0; cpu < NR_CPUS; cpu++ )
+ if ( __get_cpu_bit(cpu, have_lock) )
spin_unlock(&schedule_data[cpu].schedule_lock);
__clear_cpu_bits(have_lock);
*/
static void __enter_scheduler(void)
{
- struct vcpu *prev = current, *next = NULL;
- int cpu = prev->processor;
+ struct vcpu *prev = current, *next = NULL;
+ int cpu = smp_processor_id();
s_time_t now;
struct task_slice next_slice;
s32 r_time; /* time for new dom to run */
static void t_timer_fn(void *unused)
{
struct vcpu *v = current;
- unsigned int cpu = v->processor;
+ unsigned int cpu = smp_processor_id();
schedule_data[cpu].tick++;
#ifdef CONFIG_X86_HT
extern void detect_ht(struct cpuinfo_x86 *c);
#else
-static inline void detect_ht(struct cpuinfo_x86 *c) {}
+static always_inline void detect_ht(struct cpuinfo_x86 *c) {}
#endif
/*
/*
* CPUID functions returning a single datum
*/
-static inline unsigned int cpuid_eax(unsigned int op)
+static always_inline unsigned int cpuid_eax(unsigned int op)
{
unsigned int eax;
: "bx", "cx", "dx");
return eax;
}
-static inline unsigned int cpuid_ebx(unsigned int op)
+static always_inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx;
: "cx", "dx" );
return ebx;
}
-static inline unsigned int cpuid_ecx(unsigned int op)
+static always_inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ecx;
: "bx", "dx" );
return ecx;
}
-static inline unsigned int cpuid_edx(unsigned int op)
+static always_inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, edx;
*/
extern unsigned long mmu_cr4_features;
-static inline void set_in_cr4 (unsigned long mask)
+static always_inline void set_in_cr4 (unsigned long mask)
{
unsigned long dummy;
mmu_cr4_features |= mask;
: "=&r" (dummy) : "irg" (mask) );
}
-static inline void clear_in_cr4 (unsigned long mask)
+static always_inline void clear_in_cr4 (unsigned long mask)
{
unsigned long dummy;
mmu_cr4_features &= ~mask;
outb((data), 0x23); \
} while (0)
-static inline void __monitor(const void *eax, unsigned long ecx,
+static always_inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitor %eax,%ecx,%edx;" */
: :"a" (eax), "c" (ecx), "d"(edx));
}
-static inline void __mwait(unsigned long eax, unsigned long ecx)
+static always_inline void __mwait(unsigned long eax, unsigned long ecx)
{
/* "mwait %eax,%ecx;" */
asm volatile(
};
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
+static always_inline void rep_nop(void)
{
__asm__ __volatile__ ( "rep;nop" : : : "memory" );
}
#ifdef CONFIG_MPENTIUMIII
#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
+extern always_inline void prefetch(const void *x)
{
__asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
}
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
-extern inline void prefetch(const void *x)
+extern always_inline void prefetch(const void *x)
{
__asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
}
-extern inline void prefetchw(const void *x)
+extern always_inline void prefetchw(const void *x)
{
__asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
}
if ( need_flush )
{
perfc_incrc(update_hl2e_invlpg);
- flush_tlb_one_mask(v->domain->cpumask,
+ flush_tlb_one_mask(v->domain->domain_dirty_cpumask,
&linear_pg_table[l1_linear_offset(va)]);
}
}
atomic_t pausecnt;
+ /* Bitmask of CPUs on which this VCPU may run. */
cpumask_t cpu_affinity;
+ /* Bitmask of CPUs which are holding onto this VCPU's state. */
+ cpumask_t vcpu_dirty_cpumask;
+
struct arch_vcpu arch;
};
struct vcpu *vcpu[MAX_VIRT_CPUS];
/* Bitmask of CPUs which are holding onto this domain's state. */
- cpumask_t cpumask;
+ cpumask_t domain_dirty_cpumask;
struct arch_domain arch;